}
thash_purge_entries(vcpu, va, ps);
#endif
+
+ if (slot >= NITRS) {
+ panic_domain(NULL, "bad itr.i slot (%ld)", slot);
+ return IA64_FAULT;
+ }
+
pte &= ~PAGE_FLAGS_RV_MASK;
vcpu_get_rr(vcpu, va, &rid);
rid = rid& RR_RID_MASK;
return IA64_FAULT;
}
#endif
+
+ if (slot >= NDTRS) {
+ panic_domain(NULL, "bad itr.d slot (%ld)", slot);
+ return IA64_FAULT;
+ }
+
pte &= ~PAGE_FLAGS_RV_MASK;
/* This is a bad workaround
static IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INST64 inst)
{
u64 itir, ifa, pte, slot;
+ ISR isr;
#ifdef VMAL_NO_FAULT_CHECK
IA64_PSR vpsr;
vpsr.val=vmx_vcpu_get_psr(vcpu);
illegal_op(vcpu);
return IA64_FAULT;
}
- ISR isr;
if ( vpsr.cpl != 0) {
/* Inject Privileged Operation fault into guest */
set_privileged_operation_isr (vcpu, 0);
}
#endif // VMAL_NO_FAULT_CHECK
+ if (slot >= NDTRS) {
+ isr.val = set_isr_ei_ni(vcpu);
+ isr.code = IA64_RESERVED_REG_FAULT;
+ vcpu_set_isr(vcpu, isr.val);
+ rsv_reg_field(vcpu);
+ return IA64_FAULT;
+ }
+
return (vmx_vcpu_itr_d(vcpu,slot,pte,itir,ifa));
}
static IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst)
{
u64 itir, ifa, pte, slot;
-#ifdef VMAL_NO_FAULT_CHECK
ISR isr;
+#ifdef VMAL_NO_FAULT_CHECK
IA64_PSR vpsr;
vpsr.val=vmx_vcpu_get_psr(vcpu);
if ( vpsr.ic ) {
}
#endif // VMAL_NO_FAULT_CHECK
+ if (slot >= NITRS) {
+ isr.val = set_isr_ei_ni(vcpu);
+ isr.code = IA64_RESERVED_REG_FAULT;
+ vcpu_set_isr(vcpu, isr.val);
+ rsv_reg_field(vcpu);
+ return IA64_FAULT;
+ }
+
return (vmx_vcpu_itr_i(vcpu,slot,pte,itir,ifa));
}